bitkeeper revision 1.1192.1.1 (420d73e1O792fPGDMcGMeH_RVU_ZZQ)
authoriap10@freefall.cl.cam.ac.uk <iap10@freefall.cl.cam.ac.uk>
Sat, 12 Feb 2005 03:11:29 +0000 (03:11 +0000)
committeriap10@freefall.cl.cam.ac.uk <iap10@freefall.cl.cam.ac.uk>
Sat, 12 Feb 2005 03:11:29 +0000 (03:11 +0000)
Rename arch.pagetable to arch.guest_table

xen/arch/x86/dom0_ops.c
xen/arch/x86/domain.c
xen/arch/x86/mm.c
xen/arch/x86/smpboot.c
xen/arch/x86/traps.c
xen/arch/x86/vmx.c
xen/arch/x86/vmx_vmcs.c
xen/arch/x86/x86_32/domain_build.c
xen/arch/x86/x86_64/domain_build.c
xen/include/asm-x86/domain.h
xen/include/asm-x86/shadow.h

index 9e7ec8c31b73d72d22b49752f9c07c31d331ce5f..0f455c6800f1854089c1b59d7898fc4ee81e7204 100644 (file)
@@ -348,16 +348,21 @@ void arch_getdomaininfo_ctxt(
     struct exec_domain *ed, full_execution_context_t *c)
 { 
     int i;
-    unsigned long vmx_domain = ed->arch.arch_vmx.flags;
+#ifdef __i386__  /* Remove when x86_64 VMX is implemented */
+    unsigned long vmx_domain;
     extern void save_vmx_execution_context(execution_context_t *);
+#endif
 
     c->flags = 0;
     memcpy(&c->cpu_ctxt, 
            &ed->arch.user_ctxt,
            sizeof(ed->arch.user_ctxt));
 
+#ifdef __i386__
+    vmx_domain = ed->arch.arch_vmx.flags;
     if (vmx_domain)
         save_vmx_execution_context(&c->cpu_ctxt);
+#endif
 
     if ( test_bit(EDF_DONEFPUINIT, &ed->ed_flags) )
         c->flags |= ECF_I387_VALID;
@@ -390,7 +395,7 @@ void arch_getdomaininfo_ctxt(
     c->kernel_ss  = ed->arch.kernel_ss;
     c->kernel_esp = ed->arch.kernel_sp;
     c->pt_base   = 
-        pagetable_val(ed->arch.pagetable);
+        pagetable_val(ed->arch.guest_table);
     memcpy(c->debugreg, 
            ed->arch.debugreg, 
            sizeof(ed->arch.debugreg));
index a4ccdf21bc8ca438c0c1adfd41bb5fe66f489030..ed987e15032b8631f91780f53744996126109366 100644 (file)
@@ -467,8 +467,8 @@ int arch_final_setup_guest(
     d->arch.failsafe_address  = c->failsafe_callback_eip;
     
     phys_basetab = c->pt_base;
-    d->arch.pagetable = mk_pagetable(phys_basetab);
-    d->arch.phys_table = d->arch.pagetable;
+    d->arch.guest_table = mk_pagetable(phys_basetab);
+    d->arch.phys_table = d->arch.guest_table;
     if ( !get_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT], d->domain, 
                             PGT_base_page_table) )
         return -EINVAL;
@@ -658,7 +658,7 @@ long do_switch_to_user(void)
     struct exec_domain    *ed = current;
 
     if ( unlikely(copy_from_user(&stu, (void *)regs->rsp, sizeof(stu))) ||
-         unlikely(pagetable_val(ed->arch.pagetable_user) == 0) )
+         unlikely(pagetable_val(ed->arch.guest_table_user) == 0) )
         return -EFAULT;
 
     ed->arch.flags &= ~TF_kernel_mode;
@@ -947,19 +947,19 @@ void domain_relinquish_memory(struct domain *d)
     /* Drop the in-use references to page-table bases. */
     for_each_exec_domain ( d, ed )
     {
-        if ( pagetable_val(ed->arch.pagetable) != 0 )
+        if ( pagetable_val(ed->arch.guest_table) != 0 )
         {
             put_page_and_type(
-                &frame_table[pagetable_val(ed->arch.pagetable) >> PAGE_SHIFT]);
-            ed->arch.pagetable = mk_pagetable(0);
+                &frame_table[pagetable_val(ed->arch.guest_table) >> PAGE_SHIFT]);
+            ed->arch.guest_table = mk_pagetable(0);
         }
 
-        if ( pagetable_val(ed->arch.pagetable_user) != 0 )
+        if ( pagetable_val(ed->arch.guest_table_user) != 0 )
         {
             put_page_and_type(
-                &frame_table[pagetable_val(ed->arch.pagetable_user) >>
+                &frame_table[pagetable_val(ed->arch.guest_table_user) >>
                             PAGE_SHIFT]);
-            ed->arch.pagetable_user = mk_pagetable(0);
+            ed->arch.guest_table_user = mk_pagetable(0);
         }
     }
 
index 123ff0fbda8a94a65aa8cad087c31f937670c43f..09819b958ca16768593c9b0ef338f14e556c86ba 100644 (file)
@@ -205,16 +205,16 @@ void write_ptbase(struct exec_domain *ed)
               pagetable_val(ed->arch.monitor_table) :
               pagetable_val(ed->arch.shadow_table));
     else
-        pa = pagetable_val(ed->arch.pagetable);
+        pa = pagetable_val(ed->arch.guest_table);
 #else
     if ( unlikely(shadow_mode(d)) )
         pa = pagetable_val(ed->arch.shadow_table);    
 #ifdef __x86_64__
     else if ( !(ed->arch.flags & TF_kernel_mode) )
-        pa = pagetable_val(ed->arch.pagetable_user);
+        pa = pagetable_val(ed->arch.guest_table_user);
 #endif
     else
-        pa = pagetable_val(ed->arch.pagetable);
+        pa = pagetable_val(ed->arch.guest_table);
 #endif
 
     write_cr3(pa);
@@ -1249,8 +1249,8 @@ int new_guest_cr3(unsigned long pfn)
         invalidate_shadow_ldt(ed);
 
         percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB;
-        old_base_pfn = pagetable_val(ed->arch.pagetable) >> PAGE_SHIFT;
-        ed->arch.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
+        old_base_pfn = pagetable_val(ed->arch.guest_table) >> PAGE_SHIFT;
+        ed->arch.guest_table = mk_pagetable(pfn << PAGE_SHIFT);
 
         shadow_mk_pagetable(ed);
 
@@ -1355,8 +1355,8 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
         else
         {
             unsigned long old_pfn =
-                pagetable_val(ed->arch.pagetable_user) >> PAGE_SHIFT;
-            ed->arch.pagetable_user = mk_pagetable(pfn << PAGE_SHIFT);
+                pagetable_val(ed->arch.guest_table_user) >> PAGE_SHIFT;
+            ed->arch.guest_table_user = mk_pagetable(pfn << PAGE_SHIFT);
             if ( old_pfn != 0 )
                 put_page_and_type(&frame_table[old_pfn]);
         }
@@ -1676,7 +1676,7 @@ int do_mmu_update(
     cleanup_writable_pagetable(d);
 
     if ( unlikely(shadow_mode(d)) )
-        check_pagetable(d, ed->arch.pagetable, "pre-mmu"); /* debug */
+        check_pagetable(d, ed->arch.guest_table, "pre-mmu"); /* debug */
 
     /*
      * If we are resuming after preemption, read how much work we have already
@@ -1924,7 +1924,7 @@ int do_mmu_update(
         __put_user(done + i, pdone);
 
     if ( unlikely(shadow_mode(d)) )
-        check_pagetable(d, ed->arch.pagetable, "post-mmu"); /* debug */
+        check_pagetable(d, ed->arch.guest_table, "post-mmu"); /* debug */
 
     UNLOCK_BIGLOCK(d);
     return rc;
@@ -1983,7 +1983,7 @@ int do_update_va_mapping(unsigned long va,
         if ( shadow_mode(d) == SHM_logdirty )
             mark_dirty(d, va_to_l1mfn(va));
   
-        check_pagetable(d, ed->arch.pagetable, "va"); /* debug */
+        check_pagetable(d, ed->arch.guest_table, "va"); /* debug */
     }
 
     deferred_ops = percpu_info[cpu].deferred_ops;
@@ -2645,7 +2645,7 @@ void audit_domain(struct domain *d)
     synchronise_pagetables(~0UL);
 
     printk("pt base=%lx sh_info=%x\n",
-           pagetable_val(d->exec_domain[0]->arch.pagetable)>>PAGE_SHIFT,
+           pagetable_val(d->exec_domain[0]->arch.guest_table)>>PAGE_SHIFT,
            virt_to_page(d->shared_info)-frame_table);
            
     spin_lock(&d->page_alloc_lock);
@@ -2694,8 +2694,8 @@ void audit_domain(struct domain *d)
 
 
     /* PHASE 1 */
-    if ( pagetable_val(d->exec_domain[0]->arch.pagetable) )
-        adjust(&frame_table[pagetable_val(d->exec_domain[0]->arch.pagetable)
+    if ( pagetable_val(d->exec_domain[0]->arch.guest_table) )
+        adjust(&frame_table[pagetable_val(d->exec_domain[0]->arch.guest_table)
                            >>PAGE_SHIFT], -1, 1);
 
     list_ent = d->page_list.next;
@@ -2952,9 +2952,9 @@ void audit_domain(struct domain *d)
 
     spin_unlock(&d->page_alloc_lock);
 
-    if ( pagetable_val(d->exec_domain[0]->arch.pagetable) )
+    if ( pagetable_val(d->exec_domain[0]->arch.guest_table) )
         adjust(&frame_table[pagetable_val(
-            d->exec_domain[0]->arch.pagetable)>>PAGE_SHIFT], 1, 1);
+            d->exec_domain[0]->arch.guest_table)>>PAGE_SHIFT], 1, 1);
 
     printk("Audit %d: Done. pages=%d l1=%d l2=%d ctot=%d ttot=%d\n", d->id, i, l1, l2, ctot, ttot );
 
index 681e05a381209220f7b9296857cfae8931630e07..00838ecce2d4cdf63d4c5c6f415b7cd106892f00 100644 (file)
@@ -668,7 +668,7 @@ static void __init do_boot_cpu (int apicid)
 
     set_bit(DF_IDLETASK, &idle->d_flags);
 
-    ed->arch.pagetable = mk_pagetable(__pa(idle_pg_table));
+    ed->arch.guest_table = mk_pagetable(__pa(idle_pg_table));
 
     map_cpu_to_boot_apicid(cpu, apicid);
 
index 6b3cb557c24169d316ac201dc1a8f03c3ceafa38..cd587194b78791551bd72a22ab94f46e77ddb81e 100644 (file)
@@ -401,7 +401,7 @@ static int emulate_privileged_op(struct xen_regs *regs)
             break;
             
         case 3: /* Read CR3 */
-            *reg = pagetable_val(ed->arch.pagetable);
+            *reg = pagetable_val(ed->arch.guest_table);
             break;
 
         default:
index 2a6537b518d60e4288aa1cea0b5772db2dc3e939..21f43067e93235c3dab01e433237969610d1bfd4 100644 (file)
@@ -419,13 +419,13 @@ static void mov_to_cr(int gp, int cr, struct xen_regs *regs)
                         d->arch.arch_vmx.cpu_cr3);
                 domain_crash(); /* need to take a clean path */
             }
-            old_base_pfn = pagetable_val(d->arch.pagetable) >> PAGE_SHIFT;
+            old_base_pfn = pagetable_val(d->arch.guest_table) >> PAGE_SHIFT;
             /*
-             * Now mm.pagetable points to machine physical.
+             * Now arch.guest_table points to machine physical.
              */
-            d->arch.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
+            d->arch.guest_table = mk_pagetable(pfn << PAGE_SHIFT);
 
-            VMX_DBG_LOG(DBG_LEVEL_VMMU, "New mm.pagetable = %lx\n", 
+            VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx\n", 
                     (unsigned long) (pfn << PAGE_SHIFT));
 
             shadow_lock(d->domain);
@@ -466,7 +466,7 @@ static void mov_to_cr(int gp, int cr, struct xen_regs *regs)
              * We simply invalidate the shadow.
              */
             pfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
-            if ((pfn << PAGE_SHIFT) != pagetable_val(d->arch.pagetable))
+            if ((pfn << PAGE_SHIFT) != pagetable_val(d->arch.guest_table))
                 __vmx_bug(regs);
             vmx_shadow_clear_state(d->domain);
             shadow_invalidate(d);
@@ -484,7 +484,7 @@ static void mov_to_cr(int gp, int cr, struct xen_regs *regs)
             }
             pfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
             vmx_shadow_clear_state(d->domain);
-            d->arch.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
+            d->arch.guest_table = mk_pagetable(pfn << PAGE_SHIFT);
             shadow_mk_pagetable(d);
             /* 
              * mm->shadow_table should hold the next CR3 for shadow
index c9f1e9de1b42849bf0ec4a6060b4c7613ab19f76..da68139dab14ed9916c7ad12153bd9b07aef52b3 100644 (file)
@@ -219,8 +219,8 @@ void vmx_do_launch(struct exec_domain *ed)
     error |= __vmwrite(GUEST_TR_BASE, 0);
     error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
 
-    ed->arch.shadow_table = ed->arch.pagetable;
-    __vmwrite(GUEST_CR3, pagetable_val(ed->arch.pagetable));
+    ed->arch.shadow_table = ed->arch.guest_table;
+    __vmwrite(GUEST_CR3, pagetable_val(ed->arch.guest_table));
     __vmwrite(HOST_CR3, pagetable_val(ed->arch.monitor_table));
     __vmwrite(HOST_ESP, (unsigned long)get_stack_bottom());
 
index 38abb1d3b73a4e485d5a7bfd14dc8e2c1c4e40b9..1147a3a66ae90a193136b22e2fa50cf0a1e32b70 100644 (file)
@@ -231,7 +231,7 @@ int construct_dom0(struct domain *d,
         mk_l2_pgentry((unsigned long)l2start | __PAGE_HYPERVISOR);
     l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
         mk_l2_pgentry(__pa(d->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR);
-    ed->arch.pagetable = mk_pagetable((unsigned long)l2start);
+    ed->arch.guest_table = mk_pagetable((unsigned long)l2start);
 
     l2tab += l2_table_offset(dsi.v_start);
     mfn = alloc_start >> PAGE_SHIFT;
index 7f39c1694cb699ea7bd65b2e5d72ed95be8fa9f5..dfa9769980726ea5914c222f42c1dd2805062e90 100644 (file)
@@ -239,7 +239,7 @@ int construct_dom0(struct domain *d,
         mk_l4_pgentry(__pa(l4start) | __PAGE_HYPERVISOR);
     l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
         mk_l4_pgentry(__pa(d->arch.mm_perdomain_l3) | __PAGE_HYPERVISOR);
-    ed->arch.pagetable = mk_pagetable(__pa(l4start));
+    ed->arch.guest_table = mk_pagetable(__pa(l4start));
 
     l4tab += l4_table_offset(dsi.v_start);
     mfn = alloc_start >> PAGE_SHIFT;
index 32205b63b4d39145a90482898425f86aecf954a3..70c4d1b1f3eb5b5dc8f3d3871f8a45fe90e44178 100644 (file)
@@ -100,12 +100,14 @@ struct arch_exec_domain
      * are put in this table (eg. the current GDT is mapped here).
      */
     l1_pgentry_t *perdomain_ptes;
-    pagetable_t  pagetable;
-    pagetable_t  pagetable_user;  /* x86/64: user-space pagetable. */
 
-    pagetable_t  monitor_table;
-    pagetable_t  phys_table;            /* 1:1 pagetable */
-    pagetable_t  shadow_table;
+    pagetable_t  guest_table_user;      /* x86/64: user-space pagetable. */
+    pagetable_t  guest_table;           /* guest notion of cr3 */
+    pagetable_t  shadow_table;          /* shadow of guest */
+    pagetable_t  monitor_table;         /* used in hypervisor */
+
+    pagetable_t  phys_table;            /* guest 1:1 pagetable */
+
     l2_pgentry_t *vpagetable;          /* virtual address of pagetable */
     l2_pgentry_t *shadow_vtable;       /* virtual address of shadow_table */
     l2_pgentry_t *guest_pl2e_cache;    /* guest page directory cache */
@@ -122,7 +124,7 @@ struct arch_exec_domain
 #define IDLE0_ARCH_EXEC_DOMAIN                                      \
 {                                                                   \
     perdomain_ptes: 0,                                              \
-    pagetable:      mk_pagetable(__pa(idle_pg_table))               \
+    guest_table:    mk_pagetable(__pa(idle_pg_table))               \
 }
 
 #endif /* __ASM_DOMAIN_H__ */
index ff90c2dccf2ee0f68f816df11825736f749b0919..40fab94af910e3b86bbddeba19c6e132fdc56e72 100644 (file)
@@ -726,7 +726,7 @@ static inline unsigned long gva_to_gpa(unsigned long gva)
 static inline void __shadow_mk_pagetable(struct exec_domain *ed)
 {
     struct domain *d = ed->domain;
-    unsigned long gpfn = pagetable_val(ed->arch.pagetable) >> PAGE_SHIFT;
+    unsigned long gpfn = pagetable_val(ed->arch.guest_table) >> PAGE_SHIFT;
     unsigned long smfn = __shadow_status(d, gpfn) & PSH_pfn_mask;
 
     SH_VVLOG("0: __shadow_mk_pagetable(gpfn=%p, smfn=%p)", gpfn, smfn);
@@ -747,7 +747,7 @@ static inline void shadow_mk_pagetable(struct exec_domain *ed)
      if ( unlikely(shadow_mode(ed->domain)) )
      {
          SH_VVLOG("shadow_mk_pagetable( gptbase=%p, mode=%d )",
-             pagetable_val(ed->arch.pagetable),
+             pagetable_val(ed->arch.guest_table),
                   shadow_mode(ed->domain)); 
 
          shadow_lock(ed->domain);
@@ -756,7 +756,7 @@ static inline void shadow_mk_pagetable(struct exec_domain *ed)
 
      SH_VVLOG("leaving shadow_mk_pagetable:\n"
               "( gptbase=%p, mode=%d ) sh=%p",
-              pagetable_val(ed->arch.pagetable),
+              pagetable_val(ed->arch.guest_table),
               shadow_mode(ed->domain), 
               pagetable_val(ed->arch.shadow_table) );
      }